From 4fc7afbfaf35cc37a63b1e0683eaed65d7ef0ea4 Mon Sep 17 00:00:00 2001 From: eopXD Date: Mon, 5 Jun 2023 01:39:21 -0700 Subject: [PATCH] [3/6][Clang][RISCV] Replace strided segment load with tuple type interfaces Depends on D152135. This is the 3rd commit of the patch-set. This patch makes the strided segment load intrinsics to use tuple types. Reviewed By: rogfer01 Differential Revision: https://reviews.llvm.org/D152136 --- clang/include/clang/Basic/riscv_vector.td | 71 +- .../non-policy/non-overloaded/vlsseg2e16.c | 420 ++-- .../non-policy/non-overloaded/vlsseg2e32.c | 336 ++- .../non-overloaded/vlsseg2e32_tuple.c | 248 --- .../non-policy/non-overloaded/vlsseg2e64.c | 252 +-- .../non-policy/non-overloaded/vlsseg2e8.c | 336 ++- .../non-policy/non-overloaded/vlsseg3e16.c | 384 ++-- .../non-policy/non-overloaded/vlsseg3e32.c | 288 +-- .../non-policy/non-overloaded/vlsseg3e64.c | 192 +- .../non-policy/non-overloaded/vlsseg3e8.c | 320 +-- .../non-policy/non-overloaded/vlsseg4e16.c | 432 ++-- .../non-policy/non-overloaded/vlsseg4e32.c | 324 +-- .../non-policy/non-overloaded/vlsseg4e64.c | 216 +- .../non-policy/non-overloaded/vlsseg4e8.c | 360 +--- .../non-policy/non-overloaded/vlsseg5e16.c | 360 +--- .../non-policy/non-overloaded/vlsseg5e32.c | 240 +-- .../non-policy/non-overloaded/vlsseg5e64.c | 120 +- .../non-policy/non-overloaded/vlsseg5e8.c | 320 +-- .../non-policy/non-overloaded/vlsseg6e16.c | 396 +--- .../non-policy/non-overloaded/vlsseg6e32.c | 264 +-- .../non-policy/non-overloaded/vlsseg6e64.c | 132 +- .../non-policy/non-overloaded/vlsseg6e8.c | 352 +--- .../non-policy/non-overloaded/vlsseg7e16.c | 432 +--- .../non-policy/non-overloaded/vlsseg7e32.c | 288 +-- .../non-policy/non-overloaded/vlsseg7e64.c | 144 +- .../non-policy/non-overloaded/vlsseg7e8.c | 384 +--- .../non-policy/non-overloaded/vlsseg8e16.c | 468 +---- .../non-policy/non-overloaded/vlsseg8e32.c | 312 +-- .../non-policy/non-overloaded/vlsseg8e64.c | 156 +- .../non-policy/non-overloaded/vlsseg8e8.c | 416 +--- .../non-policy/overloaded/vlsseg2e16.c | 210 +- .../non-policy/overloaded/vlsseg2e32.c | 168 +- .../non-policy/overloaded/vlsseg2e32_tuple.c | 128 -- .../non-policy/overloaded/vlsseg2e64.c | 126 +- .../non-policy/overloaded/vlsseg2e8.c | 168 +- .../non-policy/overloaded/vlsseg3e16.c | 192 +- .../non-policy/overloaded/vlsseg3e32.c | 144 +- .../non-policy/overloaded/vlsseg3e64.c | 96 +- .../non-policy/overloaded/vlsseg3e8.c | 160 +- .../non-policy/overloaded/vlsseg4e16.c | 216 +- .../non-policy/overloaded/vlsseg4e32.c | 162 +- .../non-policy/overloaded/vlsseg4e64.c | 108 +- .../non-policy/overloaded/vlsseg4e8.c | 180 +- .../non-policy/overloaded/vlsseg5e16.c | 180 +- .../non-policy/overloaded/vlsseg5e32.c | 120 +- .../non-policy/overloaded/vlsseg5e64.c | 60 +- .../non-policy/overloaded/vlsseg5e8.c | 160 +- .../non-policy/overloaded/vlsseg6e16.c | 198 +- .../non-policy/overloaded/vlsseg6e32.c | 132 +- .../non-policy/overloaded/vlsseg6e64.c | 66 +- .../non-policy/overloaded/vlsseg6e8.c | 176 +- .../non-policy/overloaded/vlsseg7e16.c | 216 +- .../non-policy/overloaded/vlsseg7e32.c | 144 +- .../non-policy/overloaded/vlsseg7e64.c | 72 +- .../non-policy/overloaded/vlsseg7e8.c | 192 +- .../non-policy/overloaded/vlsseg8e16.c | 234 +-- .../non-policy/overloaded/vlsseg8e32.c | 156 +- .../non-policy/overloaded/vlsseg8e64.c | 78 +- .../non-policy/overloaded/vlsseg8e8.c | 208 +- .../policy/non-overloaded/vlsseg2e16.c | 1200 +++++------ .../policy/non-overloaded/vlsseg2e32.c | 960 ++++----- .../policy/non-overloaded/vlsseg2e32_tuple.c | 681 ------ .../policy/non-overloaded/vlsseg2e64.c | 720 +++---- .../policy/non-overloaded/vlsseg2e8.c | 963 ++++----- .../policy/non-overloaded/vlsseg3e16.c | 1152 +++++----- .../policy/non-overloaded/vlsseg3e32.c | 864 ++++---- .../policy/non-overloaded/vlsseg3e64.c | 576 ++--- .../policy/non-overloaded/vlsseg3e8.c | 963 ++++----- .../policy/non-overloaded/vlsseg4e16.c | 1724 +++++++-------- .../policy/non-overloaded/vlsseg4e32.c | 1292 ++++++------ .../policy/non-overloaded/vlsseg4e64.c | 672 +++--- .../policy/non-overloaded/vlsseg4e8.c | 1439 ++++++------- .../policy/non-overloaded/vlsseg5e16.c | 1436 ++++++------- .../policy/non-overloaded/vlsseg5e32.c | 768 +++---- .../policy/non-overloaded/vlsseg5e64.c | 384 ++-- .../policy/non-overloaded/vlsseg5e8.c | 1279 +++++------ .../policy/non-overloaded/vlsseg6e16.c | 1580 +++++++------- .../policy/non-overloaded/vlsseg6e32.c | 864 ++++---- .../policy/non-overloaded/vlsseg6e64.c | 432 ++-- .../policy/non-overloaded/vlsseg6e8.c | 1407 ++++++------- .../policy/non-overloaded/vlsseg7e16.c | 1724 +++++++-------- .../policy/non-overloaded/vlsseg7e32.c | 960 ++++----- .../policy/non-overloaded/vlsseg7e64.c | 480 ++--- .../policy/non-overloaded/vlsseg7e8.c | 1535 +++++++------- .../policy/non-overloaded/vlsseg8e16.c | 1868 ++++++++--------- .../policy/non-overloaded/vlsseg8e32.c | 1056 +++++----- .../policy/non-overloaded/vlsseg8e64.c | 528 ++--- .../policy/non-overloaded/vlsseg8e8.c | 1663 +++++++-------- .../policy/overloaded/vlsseg2e16.c | 1200 +++++------ .../policy/overloaded/vlsseg2e32.c | 960 ++++----- .../policy/overloaded/vlsseg2e32_tuple.c | 680 ------ .../policy/overloaded/vlsseg2e64.c | 720 +++---- .../policy/overloaded/vlsseg2e8.c | 963 ++++----- .../policy/overloaded/vlsseg3e16.c | 1152 +++++----- .../policy/overloaded/vlsseg3e32.c | 864 ++++---- .../policy/overloaded/vlsseg3e64.c | 576 ++--- .../policy/overloaded/vlsseg3e8.c | 963 ++++----- .../policy/overloaded/vlsseg4e16.c | 1724 +++++++-------- .../policy/overloaded/vlsseg4e32.c | 1292 ++++++------ .../policy/overloaded/vlsseg4e64.c | 672 +++--- .../policy/overloaded/vlsseg4e8.c | 1439 ++++++------- .../policy/overloaded/vlsseg5e16.c | 1436 ++++++------- .../policy/overloaded/vlsseg5e32.c | 768 +++---- .../policy/overloaded/vlsseg5e64.c | 384 ++-- .../policy/overloaded/vlsseg5e8.c | 1279 +++++------ .../policy/overloaded/vlsseg6e16.c | 1580 +++++++------- .../policy/overloaded/vlsseg6e32.c | 864 ++++---- .../policy/overloaded/vlsseg6e64.c | 432 ++-- .../policy/overloaded/vlsseg6e8.c | 1407 ++++++------- .../policy/overloaded/vlsseg7e16.c | 1724 +++++++-------- .../policy/overloaded/vlsseg7e32.c | 960 ++++----- .../policy/overloaded/vlsseg7e64.c | 480 ++--- .../policy/overloaded/vlsseg7e8.c | 1535 +++++++------- .../policy/overloaded/vlsseg8e16.c | 1868 ++++++++--------- .../policy/overloaded/vlsseg8e32.c | 1056 +++++----- .../policy/overloaded/vlsseg8e64.c | 528 ++--- .../policy/overloaded/vlsseg8e8.c | 1663 +++++++-------- 117 files changed, 33887 insertions(+), 41865 deletions(-) delete mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32_tuple.c delete mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32_tuple.c delete mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32_tuple.c delete mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32_tuple.c diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 878ca54fb36f..160d32084c2a 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -692,73 +692,6 @@ class PVString { !eq(nf, 8): !if(signed, "PvPvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUvPUv")); } -multiclass RVVStridedSegLoad { - foreach type = TypeList in { - defvar eew = !cond(!eq(type, "c") : "8", - !eq(type, "s") : "16", - !eq(type, "i") : "32", - !eq(type, "l") : "64", - !eq(type, "x") : "16", - !eq(type, "f") : "32", - !eq(type, "d") : "64"); - foreach nf = NFList in { - let Name = op # nf # "e" # eew # "_v", - IRName = op # nf, - MaskedIRName = op # nf # "_mask", - NF = nf, - ManualCodegen = [{ - { - ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType()); - IntrinsicTypes = {ResultType, Ops.back()->getType()}; - SmallVector Operands; - - // Please refer to comment under 'defvar NFList' in this file - if ((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || - (!IsMasked && PolicyAttrs & RVV_VTA)) - Operands.append(NF, llvm::PoisonValue::get(ResultType)); - else { - if (IsMasked) - Operands.append(Ops.begin() + NF + 1, Ops.begin() + 2 * NF + 1); - else // Unmasked - Operands.append(Ops.begin() + NF, Ops.begin() + 2 * NF); - } - unsigned PtrOperandIdx = IsMasked ? - ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ? NF + 1 : 2 * NF + 1 : - (PolicyAttrs & RVV_VTA) ? NF : 2 * NF; - Value *PtrOperand = Ops[PtrOperandIdx]; - Value *StrideOperand = Ops[PtrOperandIdx + 1]; - Value *VLOperand = Ops[PtrOperandIdx + 2]; - Operands.push_back(PtrOperand); - Operands.push_back(StrideOperand); - if (IsMasked) - Operands.push_back(Ops[NF]); - Operands.push_back(VLOperand); - if (IsMasked) - Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); - - llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); - llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); - clang::CharUnits Align = - CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType()); - llvm::Value *V; - for (unsigned I = 0; I < NF; ++I) { - llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {I}); - V = Builder.CreateStore(Val, Address(Ops[I], Val->getType(), Align)); - } - return V; - } - }] in { - defvar PV = PVString.S; - defvar PUV = PVString.S; - def : RVVBuiltin<"v", "0" # PV # "PCe" # "t", type>; - if !not(IsFloat.val) then { - def : RVVBuiltin<"Uv", "0" # PUV # "PCUe" # "t", type>; - } - } - } - } -} - multiclass RVVIndexedSegLoad { foreach type = TypeList in { foreach eew_info = EEWList in { @@ -1320,7 +1253,6 @@ defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>; // 7.8 Vector Load/Store Segment Instructions let UnMaskedPolicyScheme = HasPassthruOperand in { -defm : RVVStridedSegLoad<"vlsseg">; defm : RVVIndexedSegLoad<"vluxseg">; defm : RVVIndexedSegLoad<"vloxseg">; } @@ -1518,8 +1450,7 @@ multiclass RVVStridedSegLoadTuple { !eq(type, "f") : "32", !eq(type, "d") : "64"); foreach nf = NFList in { - let Name = op # nf # "e" # eew # "_v_tuple", - OverloadedName = op # nf # "e" # eew # "_tuple", + let Name = op # nf # "e" # eew # "_v", IRName = op # nf, MaskedIRName = op # nf # "_mask", NF = nf, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c index 3e5db78c4137..fd3cad742f33 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e16.c @@ -7,423 +7,303 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4(v0, v1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2(v0, v1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1(v0, v1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2(v0, v1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m4(vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4(v0, v1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4(v0, v1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2(v0, v1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1(v0, v1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2(v0, v1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m4(vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4(v0, v1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4(v0, v1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2(v0, v1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1(v0, v1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2(v0, v1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m4(vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4(v0, v1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4_m(v0, v1, mask, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2_m(v0, v1, mask, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1_m(v0, v1, mask, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2_m(v0, v1, mask, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4_m(v0, v1, mask, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4_m(v0, v1, mask, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2_m(v0, v1, mask, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1_m(v0, v1, mask, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2_m(v0, v1, mask, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4_m(v0, v1, mask, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4_m(v0, v1, mask, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2_m(v0, v1, mask, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1_m(v0, v1, mask, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2_m(v0, v1, mask, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4_m(v0, v1, mask, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c index 4e6647ea6dc7..e941c98d2a50 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32.c @@ -7,339 +7,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2(v0, v1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1(v0, v1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2(v0, v1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m4(vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4(v0, v1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2(v0, v1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1(v0, v1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2(v0, v1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m4(vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4(v0, v1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2(v0, v1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1(v0, v1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2(v0, v1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m4(vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4(v0, v1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2_m(v0, v1, mask, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1_m(v0, v1, mask, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2_m(v0, v1, mask, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4_m(v0, v1, mask, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_m(vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2_m(v0, v1, mask, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1_m(v0, v1, mask, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2_m(v0, v1, mask, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4_m(v0, v1, mask, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2_m(v0, v1, mask, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1_m(v0, v1, mask, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2_m(v0, v1, mask, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4_m(v0, v1, mask, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32_tuple.c deleted file mode 100644 index abc3b2e7ec2a..000000000000 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e32_tuple.c +++ /dev/null @@ -1,248 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2(const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32mf2x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2(const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m1x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2(const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m2x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2(const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m4x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32mf2x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m1x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m2x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m4x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32mf2x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m1x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m2x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m4x2(base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32mf2x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m1x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m2x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_m(vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m4x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32mf2x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m1x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m2x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m4x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32mf2x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m1x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m2x2_m(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m4x2_m(mask, base, bstride, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c index 0efaf5e21bb0..d0a9bfbae197 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e64.c @@ -7,255 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1(v0, v1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2(v0, v1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m4(vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4(v0, v1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1(v0, v1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2(v0, v1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m4(vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4(v0, v1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1(v0, v1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2(v0, v1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m4(vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4(v0, v1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1_m(v0, v1, mask, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2_m(v0, v1, mask, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4_m(v0, v1, mask, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_m(vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1_m(v0, v1, mask, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2_m(v0, v1, mask, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4_m(v0, v1, mask, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1_m(v0, v1, mask, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2_m(v0, v1, mask, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4_m(v0, v1, mask, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c index 6c12dc251fd6..5e7a2dc432b9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg2e8.c @@ -6,339 +6,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8(v0, v1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4(v0, v1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2(v0, v1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1(v0, v1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2(v0, v1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m4(vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4(v0, v1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8(v0, v1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4(v0, v1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2(v0, v1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1(v0, v1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2(v0, v1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m4(vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4(v0, v1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8_m(v0, v1, mask, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4_m(v0, v1, mask, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2_m(v0, v1, mask, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1_m(v0, v1, mask, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2_m(v0, v1, mask, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4_m(v0, v1, mask, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8_m(v0, v1, mask, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4_m(v0, v1, mask, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2_m(v0, v1, mask, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1_m(v0, v1, mask, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2_m(v0, v1, mask, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4_m(v0, v1, mask, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c index 1db9cf1205e1..74960833e5e3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e16.c @@ -7,387 +7,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4(v0, v1, v2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2(v0, v1, v2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1(v0, v1, v2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2(v0, v1, v2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4(v0, v1, v2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2(v0, v1, v2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1(v0, v1, v2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2(v0, v1, v2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4(v0, v1, v2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2(v0, v1, v2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1(v0, v1, v2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2(v0, v1, v2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4_m(v0, v1, v2, mask, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2_m(v0, v1, v2, mask, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1_m(v0, v1, v2, mask, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2_m(v0, v1, v2, mask, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4_m(v0, v1, v2, mask, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2_m(v0, v1, v2, mask, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1_m(v0, v1, v2, mask, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2_m(v0, v1, v2, mask, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4_m(v0, v1, v2, mask, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2_m(v0, v1, v2, mask, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1_m(v0, v1, v2, mask, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2_m(v0, v1, v2, mask, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c index ece187e9dfeb..c21482f5445c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e32.c @@ -7,291 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2(v0, v1, v2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1(v0, v1, v2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2(v0, v1, v2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2(v0, v1, v2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1(v0, v1, v2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2(v0, v1, v2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2(v0, v1, v2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1(v0, v1, v2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2(v0, v1, v2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2_m(v0, v1, v2, mask, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1_m(v0, v1, v2, mask, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2_m(v0, v1, v2, mask, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2_m(v0, v1, v2, mask, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1_m(v0, v1, v2, mask, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2_m(v0, v1, v2, mask, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2_m(v0, v1, v2, mask, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1_m(v0, v1, v2, mask, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2_m(v0, v1, v2, mask, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c index 7759d088a4f5..04a48f93ce7c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e64.c @@ -7,195 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1(v0, v1, v2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2(v0, v1, v2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1(v0, v1, v2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2(v0, v1, v2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1(v0, v1, v2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2(v0, v1, v2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1_m(v0, v1, v2, mask, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2_m(v0, v1, v2, mask, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1_m(v0, v1, v2, mask, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2_m(v0, v1, v2, mask, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1_m(v0, v1, v2, mask, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2_m(v0, v1, v2, mask, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c index 2d6a2be8dff5..3449c4b577c3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg3e8.c @@ -6,323 +6,203 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8(v0, v1, v2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4(v0, v1, v2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2(v0, v1, v2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1(v0, v1, v2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2(v0, v1, v2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8(v0, v1, v2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4(v0, v1, v2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2(v0, v1, v2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1(v0, v1, v2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2(v0, v1, v2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8_m(v0, v1, v2, mask, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4_m(v0, v1, v2, mask, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2_m(v0, v1, v2, mask, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1_m(v0, v1, v2, mask, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2_m(v0, v1, v2, mask, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8_m(v0, v1, v2, mask, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4_m(v0, v1, v2, mask, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2_m(v0, v1, v2, mask, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1_m(v0, v1, v2, mask, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2_m(v0, v1, v2, mask, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c index c53b891e7466..1828de358404 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e16.c @@ -7,435 +7,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4(v0, v1, v2, v3, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2(v0, v1, v2, v3, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1(v0, v1, v2, v3, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16m2(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2(v0, v1, v2, v3, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4(v0, v1, v2, v3, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2(v0, v1, v2, v3, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1(v0, v1, v2, v3, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16m2(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2(v0, v1, v2, v3, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4(v0, v1, v2, v3, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2(v0, v1, v2, v3, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1(v0, v1, v2, v3, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16m2(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2(v0, v1, v2, v3, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c index e863b1094ab2..87fac712d7cc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e32.c @@ -7,327 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2(v0, v1, v2, v3, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1(v0, v1, v2, v3, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32m2(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2(v0, v1, v2, v3, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2(v0, v1, v2, v3, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1(v0, v1, v2, v3, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32m2(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2(v0, v1, v2, v3, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2(v0, v1, v2, v3, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1(v0, v1, v2, v3, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32m2(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2(v0, v1, v2, v3, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c index 6f0b0992f8e5..d6b7f483ee44 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e64.c @@ -7,219 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1(v0, v1, v2, v3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_f64m2(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2(v0, v1, v2, v3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1(v0, v1, v2, v3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_i64m2(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2(v0, v1, v2, v3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1(v0, v1, v2, v3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_u64m2(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2(v0, v1, v2, v3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c index 5d1898a22a39..7415b0f60c63 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg4e8.c @@ -6,363 +6,203 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8(v0, v1, v2, v3, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4(v0, v1, v2, v3, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2(v0, v1, v2, v3, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1(v0, v1, v2, v3, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8m2(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2(v0, v1, v2, v3, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8(v0, v1, v2, v3, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4(v0, v1, v2, v3, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2(v0, v1, v2, v3, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1(v0, v1, v2, v3, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8m2(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2(v0, v1, v2, v3, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c index 0ecae0b82edb..bafd3787848f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e16.c @@ -7,363 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4(v0, v1, v2, v3, v4, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1(v0, v1, v2, v3, v4, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4(v0, v1, v2, v3, v4, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1(v0, v1, v2, v3, v4, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c index 079373ece334..41b5a87c38d0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e32.c @@ -7,243 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1(v0, v1, v2, v3, v4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1(v0, v1, v2, v3, v4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1(v0, v1, v2, v3, v4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c index 0319cc582dd0..1e0a16bd5c60 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e64.c @@ -7,123 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1(v0, v1, v2, v3, v4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1(v0, v1, v2, v3, v4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1(v0, v1, v2, v3, v4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c index 7284e6d3ae09..4f1546edb3d3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg5e8.c @@ -6,323 +6,163 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8(v0, v1, v2, v3, v4, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4(v0, v1, v2, v3, v4, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1(v0, v1, v2, v3, v4, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8(v0, v1, v2, v3, v4, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4(v0, v1, v2, v3, v4, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2(v0, v1, v2, v3, v4, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1(v0, v1, v2, v3, v4, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c index 3a0944ba9cd0..92c4b670c8c6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e16.c @@ -7,399 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c index c87be4c34232..836341ef0d35 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e32.c @@ -7,267 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c index fedb3f3bed95..ce97076f85be 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e64.c @@ -7,135 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c index 6663d0852909..cec568fd0b1d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg6e8.c @@ -6,355 +6,163 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c index 41992e6e6918..e227bf671bbe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e16.c @@ -7,435 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c index ac3eb27c9152..74ffc5240ab9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e32.c @@ -7,291 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c index 3ca3fa451288..a1b8f184c049 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e64.c @@ -7,147 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c index 45cd9cec2fd3..1bdbe07672e7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg7e8.c @@ -6,387 +6,163 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c index 53718c7d5935..9f21e74fff69 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e16.c @@ -7,471 +7,183 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16mf4(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16mf2(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16m1(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16mf4(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16mf2(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16m1(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8(const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16mf4(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16mf2(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16m1(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8(const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c index bbd02c8eb54e..5465dc60df6d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e32.c @@ -7,315 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_f32mf2(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_f32m1(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8(const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_i32mf2(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_i32m1(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8(const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_u32mf2(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_u32m1(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8(const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c index a9419e626947..c6bdb8d13d99 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e64.c @@ -7,159 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_f64m1(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8(const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_i64m1(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8(const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_u64m1(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8(const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c index 7fa3492d12c4..7afb166ffcae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlsseg8e8.c @@ -6,419 +6,163 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf8(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf4(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf2(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8m1(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8(const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf8(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf4(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf2(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8m1(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8(base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_m(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_m(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c index 12e94a4a7b42..28a63b8f1ce4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e16.c @@ -7,213 +7,153 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_f16m4_m(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_m(vbool4_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_i16m4_m(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e16_v_u16m4_m(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(v0, v1, mask, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c index f404a2b01e9a..eafc8ebb8fe5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32.c @@ -7,171 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_f32m4_m(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_m(vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_i32m4_m(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e32_v_u32m4_m(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(v0, v1, mask, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32_tuple.c deleted file mode 100644 index de35620d0947..000000000000 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e32_tuple.c +++ /dev/null @@ -1,128 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_m(vbool8_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: ret { , } [[TMP0]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple(mask, base, bstride, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c index 18a174cc98fd..847dc916b00c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e64.c @@ -7,129 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_f64m4_m(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_m(vbool16_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_i64m4_m(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e64_v_u64m4_m(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(v0, v1, mask, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c index bf1ef87fcd76..1d6ad0ec9f7c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg2e8.c @@ -6,171 +6,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_i8m4_m(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , } [[TMP0]] // -void test_vlsseg2e8_v_u8m4_m(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(v0, v1, mask, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c index 9709051ba5d0..7e4d01a8ef68 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e16.c @@ -7,195 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(v0, v1, v2, mask, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c index 89caea1be9a3..c34b3539c856 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e32.c @@ -7,147 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(v0, v1, v2, mask, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c index 0ecc6ba8dcb3..8e1c2c3c51f8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e64.c @@ -7,99 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(v0, v1, v2, mask, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c index 2a829be4d529..a33b1336029d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg3e8.c @@ -6,163 +6,103 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , } [[TMP0]] // -void test_vlsseg3e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(v0, v1, v2, mask, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c index 389dfd33b208..fdadc5542ee5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e16.c @@ -7,219 +7,123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_f16m2_m(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_m(vbool8_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_i16m2_m(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e16_v_u16m2_m(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(v0, v1, v2, v3, mask, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c index 5b61ce9f0779..8b3bf4c41f17 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e32.c @@ -7,165 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_f32m2_m(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_m(vbool16_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_i32m2_m(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e32_v_u32m2_m(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(v0, v1, v2, v3, mask, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c index b4c7b0751c54..090d5024b3cc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e64.c @@ -7,111 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_f64m2_m(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_m(vbool32_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_i64m2_m(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e64_v_u64m2_m(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(v0, v1, v2, v3, mask, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c index 71f5dd2b25f5..01fd6866bb21 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg4e8.c @@ -6,183 +6,103 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_i8m2_m(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , } [[TMP0]] // -void test_vlsseg4e8_v_u8m2_m(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(v0, v1, v2, v3, mask, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c index 04b14f73dd73..38c8dd05e491 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e16.c @@ -7,183 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c index 6c7047b8a8ba..ce92596934fd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e32.c @@ -7,123 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c index f9ed4171ff9a..29f5eef1520a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e64.c @@ -7,63 +7,33 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c index 6855f25dccef..026c04e27e57 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg5e8.c @@ -6,163 +6,83 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , } [[TMP0]] // -void test_vlsseg5e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(v0, v1, v2, v3, v4, mask, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c index 838f66a4eb23..eed25c65cd0a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e16.c @@ -7,201 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c index 0f39d063ebbf..2aa3254194ee 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e32.c @@ -7,135 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c index df4d82b78d5f..957e4f32bcdc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e64.c @@ -7,69 +7,33 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c index 3496c5351ddb..a18db969c38b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg6e8.c @@ -6,179 +6,83 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP0]] // -void test_vlsseg6e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c index 943a831b3e48..fa98d8768dc4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e16.c @@ -7,219 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c index 2a7f874f57f1..c432a18d5847 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e32.c @@ -7,147 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c index 1c31f2b2954f..6d20dc5db82e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e64.c @@ -7,75 +7,33 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c index a94f1a6bb460..03745f733089 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg7e8.c @@ -6,195 +6,83 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP0]] // -void test_vlsseg7e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c index 4898927a1c3b..7d0545a48926 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e16.c @@ -7,237 +7,93 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16mf4_m(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_m(vbool64_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16mf2_m(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_m(vbool32_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_f16m1_m(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_m(vbool16_t mask, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16mf4_m(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16mf2_m(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_i16m1_m(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16mf4_m(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16mf2_m(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e16_v_u16m1_m(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c index c1a793fdcb4b..d559b42ca330 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e32.c @@ -7,159 +7,63 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_f32mf2_m(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_m(vbool64_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_f32m1_m(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_m(vbool32_t mask, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_i32mf2_m(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_i32m1_m(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_u32mf2_m(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e32_v_u32m1_m(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c index 68f16c3d0c60..14cde6db7d98 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e64.c @@ -7,81 +7,33 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_f64m1_m(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_m(vbool64_t mask, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_i64m1_m(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e64_v_u64m1_m(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c index e2a020046164..2c835c652623 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlsseg8e8.c @@ -6,211 +6,83 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf8_m(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf4_m(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8mf2_m(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_i8m1_m(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf8_m(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf4_m(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8mf2_m(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_m -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( poison, poison, poison, poison, poison, poison, poison, poison, ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 3) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP0]] // -void test_vlsseg8e8_v_u8m1_m(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8(mask, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e16.c index 9e8e40e41615..0c2b8e1e78a9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e16.c @@ -7,843 +7,843 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32.c index cce5918ebcc8..34527c1889b1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32.c @@ -7,675 +7,675 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32_tuple.c deleted file mode 100644 index cef14f42f139..000000000000 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e32_tuple.c +++ /dev/null @@ -1,681 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32mf2x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m1x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m2x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m4x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32mf2x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m1x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m2x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m4x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32mf2x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m1x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m2x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m4x2_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_f32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_i32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_tuple_u32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e64.c index e261ab667af9..83995551b497 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e64.c @@ -7,507 +7,507 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e8.c index cfc48d3e308e..9bf97d31a5a1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg2e8.c @@ -1,680 +1,681 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e16.c index b983dde860fc..909008b50ed4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e16.c @@ -7,771 +7,771 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e32.c index 8f493beef435..84fd6c601995 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e32.c @@ -7,579 +7,579 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e64.c index 3a7b84fe6a95..a6d494f8094e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e64.c @@ -7,387 +7,387 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e8.c index 769224db7d70..f8a6ae28dd83 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg3e8.c @@ -1,648 +1,649 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e16.c index 9afccf0a810e..502fb539dc2f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e16.c @@ -7,867 +7,867 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e32.c index bb70f5594079..50102f7493bc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e32.c @@ -7,651 +7,651 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e64.c index 61ef42459c62..674d1db3111a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e64.c @@ -7,435 +7,435 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e8.c index d91fc5ce70e0..5e82cc6e56f9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg4e8.c @@ -1,728 +1,729 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e16.c index 3987cfb8ea2d..fd3daf980e2a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e16.c @@ -7,723 +7,723 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e32.c index 894a2f6d8b6d..ef99b985833a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e32.c @@ -7,483 +7,483 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e64.c index 3464ee62742a..e6a0fe371a39 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e64.c @@ -7,243 +7,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e8.c index a493de706508..b4f06f8e9d96 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg5e8.c @@ -1,648 +1,649 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e16.c index b6ed001a53e9..6b1702848f93 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e16.c @@ -7,795 +7,795 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e32.c index 1c65f4b3791e..819078f7e96c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e32.c @@ -7,531 +7,531 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e64.c index a00fe18c6e94..e2d1c4e424e8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e64.c @@ -7,267 +7,267 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e8.c index 9e50daaf97a3..1ba496685f96 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg6e8.c @@ -1,712 +1,713 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e16.c index f191ed154561..837910f8a44d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e16.c @@ -7,867 +7,867 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e32.c index 583221914a33..fa8a21ffa63f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e32.c @@ -7,579 +7,579 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e64.c index bb9b87c2d3f8..609b03156dea 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e64.c @@ -7,291 +7,291 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e8.c index 750ab7baefbf..16024dc5069d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg7e8.c @@ -1,776 +1,777 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e16.c index 9329a3a58e22..b44fd86d1d0e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e16.c @@ -7,939 +7,939 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e32.c index 6bf3968d1b56..f4d36e18ab8b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e32.c @@ -7,627 +7,627 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e64.c index 2a10530070b3..eb0eee2825fb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e64.c @@ -7,315 +7,315 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e8.c index 1d90ed15e07e..115b446ccb52 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vlsseg8e8.c @@ -1,840 +1,841 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c index 8e5c40dedf0e..bb4c2473b5eb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e16.c @@ -7,843 +7,843 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_tu(vfloat16m4_t *v0, vfloat16m4_t *v1, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_tu(vint16m4_t *v0, vint16m4_t *v1, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_tu(vuint16m4_t *v0, vuint16m4_t *v1, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_tum(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_tum(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_tum(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_tumu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_tumu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_tumu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_f16m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_f16m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_f16m4_mu(vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_i16m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_i16m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_i16m4_mu(vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e16_v_u16m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e16_v_u16m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e16_v_u16m4_mu(vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c index 4a7d3ce81bc5..939a98caf3fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32.c @@ -7,675 +7,675 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_tu(vfloat32m4_t *v0, vfloat32m4_t *v1, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_tu(vint32m4_t *v0, vint32m4_t *v1, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_tu(vuint32m4_t *v0, vuint32m4_t *v1, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_tum(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_tum(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_tum(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_tumu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_tumu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_tumu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_f32m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_f32m4_mu(vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_i32m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_i32m4_mu(vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e32_v_u32m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e32_v_u32m4_mu(vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32_tuple.c deleted file mode 100644 index e2e22416df76..000000000000 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e32_tuple.c +++ /dev/null @@ -1,680 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tu -// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tu(maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tum -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tum(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_tumu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_tumu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32mf2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m1x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_f32m4x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32mf2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m1x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_i32m4x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32mf2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m1x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m2x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e32_v_u32m4x2_mu -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: ret { , } [[TMP4]] -// -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tuple_mu(mask, maskedoff_tuple, base, bstride, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c index 34d00f525c3e..64e0eb4076ca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e64.c @@ -7,507 +7,507 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_tu(vfloat64m4_t *v0, vfloat64m4_t *v1, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_tu(vint64m4_t *v0, vint64m4_t *v1, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_tu(vuint64m4_t *v0, vuint64m4_t *v1, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_tum(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_tum(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_tum(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_tumu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_tumu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_tumu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_f64m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_f64m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_f64m4_mu(vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_i64m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_i64m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_i64m4_mu(vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e64_v_u64m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e64_v_u64m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e64_v_u64m4_mu(vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c index c5b31c38ce96..c0b6cb32e6e0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg2e8.c @@ -1,680 +1,681 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_tu(vint8m4_t *v0, vint8m4_t *v1, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_tu(vuint8m4_t *v0, vuint8m4_t *v1, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(v0, v1, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_tum(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_tum(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_tumu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_tumu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf8x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_i8m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_i8m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_i8m4_mu(vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf8x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8mf2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m1x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m2x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg2e8_v_u8m4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , } @test_vlsseg2e8_v_u8m4x2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , } [[TMP4]] // -void test_vlsseg2e8_v_u8m4_mu(vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c index 9a6e4218ef78..87b08b269710 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e16.c @@ -7,771 +7,771 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_f16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_f16m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_i16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_i16m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e16_v_u16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e16_v_u16m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c index f1b315f863ec..2048eb56fbc8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e32.c @@ -7,579 +7,579 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_f32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_f32m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_i32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_i32m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e32_v_u32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e32_v_u32m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c index 4f399d307f15..61ed04bcc53f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e64.c @@ -7,387 +7,387 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_f64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_f64m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_i64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_i64m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e64_v_u64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e64_v_u64m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c index 7e74f5efb5ba..4166c5f3d94e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg3e8.c @@ -1,648 +1,649 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(v0, v1, v2, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf8x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_i8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_i8m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf8x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf4x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8mf2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m1x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg3e8_v_u8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , } @test_vlsseg3e8_v_u8m2x3_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , } [[TMP6]] // -void test_vlsseg3e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c index 749debacf6ba..12a6dff404c2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e16.c @@ -7,867 +7,867 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_tu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_tu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_tu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_tum(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_tum(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_tum(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_tumu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_tumu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_tumu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_f16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_f16m2_mu(vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_i16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_i16m2_mu(vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e16_v_u16m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e16_v_u16m2_mu(vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_f16m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_i16m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e16_v_u16m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c index 137a9d00cbb1..8830f98dafc4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e32.c @@ -7,651 +7,651 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_tu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_tu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_tu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_tum(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_tum(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_tum(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_tumu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_tumu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_tumu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_f32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_f32m2_mu(vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_i32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_i32m2_mu(vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e32_v_u32m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e32_v_u32m2_mu(vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_f32m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_i32m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e32_v_u32m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c index fa4116dfd4e4..5cb0703f920c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e64.c @@ -7,435 +7,435 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_tu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_tu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_tu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_tum(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_tum(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_tum(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_tumu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_tumu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_tumu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_f64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_f64m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_f64m2_mu(vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_i64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_i64m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_i64m2_mu(vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e64_v_u64m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e64_v_u64m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] // -void test_vlsseg4e64_v_u64m2_mu(vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c index 4f8a663a14f9..8ff2e40cca0e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg4e8.c @@ -1,728 +1,729 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_tu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_tu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(v0, v1, v2, v3, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_tum(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_tum(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_tumu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_tumu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_i8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_i8m2_mu(vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg4e8_v_u8m2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg4e8_v_u8m2_mu(vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf8x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_i8m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf8x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf4x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8mf2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m1x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , } @test_vlsseg4e8_v_u8m2x4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , } [[TMP8]] +// +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c index a23b26649137..174241c2a006 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e16.c @@ -7,723 +7,723 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_f16m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_i16m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e16_v_u16m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c index 75c4022b3992..6b8544739405 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e32.c @@ -7,483 +7,483 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_f32m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_i32m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e32_v_u32m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c index 5c97572801a0..96a7c7e9607f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e64.c @@ -7,243 +7,243 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_f64m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_i64m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e64_v_u64m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] // -void test_vlsseg5e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c index fa21ff9aa0b9..fd1f0d20e7ba 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg5e8.c @@ -1,648 +1,649 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(v0, v1, v2, v3, v4, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg5e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg5e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf8x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_i8m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf8x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf4x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8mf2x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , } @test_vlsseg5e8_v_u8m1x5_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , } [[TMP10]] +// +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c index 0bb9934ae8d1..9118fe6e53bc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e16.c @@ -7,795 +7,795 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_f16m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_i16m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e16_v_u16m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c index 23deb0ae3d2e..39caaae3cbda 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e32.c @@ -7,531 +7,531 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_f32m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_i32m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e32_v_u32m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c index 156f505d0752..d5590cf76234 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e64.c @@ -7,267 +7,267 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_f64m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_i64m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e64_v_u64m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] // -void test_vlsseg6e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c index dbf7a410a7fb..35566447cfef 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg6e8.c @@ -1,712 +1,713 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(v0, v1, v2, v3, v4, v5, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg6e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg6e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf8x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_i8m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf8x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf4x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8mf2x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , } @test_vlsseg6e8_v_u8m1x6_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , } [[TMP12]] +// +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c index 194aed96b182..c2e9b0675208 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e16.c @@ -7,867 +7,867 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_f16m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_i16m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e16_v_u16m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c index 83f9b819ce4b..e6c5dd788994 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e32.c @@ -7,579 +7,579 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_f32m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_i32m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e32_v_u32m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c index 160edc5c5160..175e639a35f0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e64.c @@ -7,291 +7,291 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_f64m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_i64m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e64_v_u64m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] // -void test_vlsseg7e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c index a14b5a59b90f..7125252d0d49 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg7e8.c @@ -1,776 +1,777 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(v0, v1, v2, v3, v4, v5, v6, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg7e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg7e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf8x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_i8m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf8x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf4x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8mf2x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , } @test_vlsseg7e8_v_u8m1x7_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , } [[TMP14]] +// +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c index 5c4485f2e002..678d81a1ef7a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e16.c @@ -7,939 +7,939 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_tu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_tu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_tu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_tu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_tu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_tu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_tu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_tu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_tu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_tum(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_tum(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_tum(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_tum(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_tum(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_tum(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_tum(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_tum(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_tum(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_tumu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_tumu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_tumu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_tumu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_tumu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_tumu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_tumu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_tumu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_tumu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf4_mu(vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16mf2_mu(vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_f16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_f16m1_mu(vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf4_mu(vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16mf2_mu(vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_i16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_i16m1_mu(vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf4_mu(vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16mf2_mu(vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e16_v_u16m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 2 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e16_v_u16m1_mu(vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_f16m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_i16m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e16_v_u16m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c index 066e71ed19a4..1815a46083a2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e32.c @@ -7,627 +7,627 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_tu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_tu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_tu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_tu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_tum(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_tum(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_tum(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_tum(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_tum(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_tum(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_tumu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_tumu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_tumu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_tumu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32mf2_mu(vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_f32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_f32m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_f32m1_mu(vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32mf2_mu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_i32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_i32m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_i32m1_mu(vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32mf2_mu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e32_v_u32m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e32_v_u32m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 4 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e32_v_u32m1_mu(vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c index 33eb4a2a072a..05a1c3b9df59 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e64.c @@ -7,315 +7,315 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_tu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_tu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_tu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tu(maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_tum(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_tum(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_tum(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tum(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_tumu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_tumu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_tumu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_tumu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_f64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_f64m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_f64m1_mu(vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const double *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_i64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_i64m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_i64m1_mu(vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_mu(mask, maskedoff_tuple, base, bstride, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e64_v_u64m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e64_v_u64m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 8 -// CHECK-RV64-NEXT: ret void +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] // -void test_vlsseg8e64_v_u64m1_mu(vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e64_mu(mask, maskedoff_tuple, base, bstride, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c index 25d501ded86b..f6b284b16483 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vlsseg8e8.c @@ -1,840 +1,841 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_tu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_tu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_tu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_tu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_tu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_tu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_tu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_tu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_tu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(v0, v1, v2, v3, v4, v5, v6, v7, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_tum(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_tum(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_tum(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_tum(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_tum(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_tum(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_tum(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_tum -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_tum(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_tumu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_tumu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_tumu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_tumu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_tumu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_tumu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_tumu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_tumu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_tumu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf8_mu(vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf4_mu(vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8mf2_mu(vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_i8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_i8m1_mu(vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf8_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf8_mu(vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf4_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf4_mu(vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8mf2_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8mf2_mu(vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vlsseg8e8_v_u8m1_mu -// CHECK-RV64-SAME: (ptr noundef [[V0:%.*]], ptr noundef [[V1:%.*]], ptr noundef [[V2:%.*]], ptr noundef [[V3:%.*]], ptr noundef [[V4:%.*]], ptr noundef [[V5:%.*]], ptr noundef [[V6:%.*]], ptr noundef [[V7:%.*]], [[MASK:%.*]], [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0]], [[MASKEDOFF1]], [[MASKEDOFF2]], [[MASKEDOFF3]], [[MASKEDOFF4]], [[MASKEDOFF5]], [[MASKEDOFF6]], [[MASKEDOFF7]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], ptr [[V0]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], ptr [[V1]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], ptr [[V2]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], ptr [[V3]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], ptr [[V4]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], ptr [[V5]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], ptr [[V6]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], ptr [[V7]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg8e8_v_u8m1_mu(vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_tum +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_tumu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf8x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_i8m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf8x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf4x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8mf2x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , , , , , , , } @test_vlsseg8e8_v_u8m1x8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF_TUPLE_COERCE0:%.*]], [[MASKEDOFF_TUPLE_COERCE1:%.*]], [[MASKEDOFF_TUPLE_COERCE2:%.*]], [[MASKEDOFF_TUPLE_COERCE3:%.*]], [[MASKEDOFF_TUPLE_COERCE4:%.*]], [[MASKEDOFF_TUPLE_COERCE5:%.*]], [[MASKEDOFF_TUPLE_COERCE6:%.*]], [[MASKEDOFF_TUPLE_COERCE7:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[MASKEDOFF_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[MASKEDOFF_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[MASKEDOFF_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[MASKEDOFF_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[MASKEDOFF_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[MASKEDOFF_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[MASKEDOFF_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[MASKEDOFF_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: [[TMP16:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], i64 [[BSTRIDE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret { , , , , , , , } [[TMP16]] +// +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); } -- GitLab